From e1f6877b5384bd2bee9437849bdcfe5ecb605629 Mon Sep 17 00:00:00 2001 From: "kaf24@firebug.cl.cam.ac.uk" Date: Thu, 28 Apr 2005 20:55:07 +0000 Subject: [PATCH] bitkeeper revision 1.1389.3.1 (42714dabVSywx2XWGjgw2J54ZylwYg) Ensure block/yield hypercalls always return a sane return code. Ensure callers of __enter_scheduler take appropriate arch-specific action if no context switch occurs (callers from arch/x86 do not expect to return from a call into the scheduler). This fixes wildly unintuitive behaviour of do_block() for the VMX team. Signed-off-by: Keir Fraser --- xen/arch/ia64/xenmisc.c | 5 +++++ xen/arch/x86/domain.c | 5 +++++ xen/arch/x86/x86_32/entry.S | 8 +++++++- xen/arch/x86/x86_64/entry.S | 8 +++++++- xen/common/schedule.c | 4 +++- xen/include/xen/sched.h | 7 ++++++- 6 files changed, 33 insertions(+), 4 deletions(-) diff --git a/xen/arch/ia64/xenmisc.c b/xen/arch/ia64/xenmisc.c index d740fe7aa9..b46e4f8f11 100644 --- a/xen/arch/ia64/xenmisc.c +++ b/xen/arch/ia64/xenmisc.c @@ -278,6 +278,11 @@ if (!i--) { printk("+",id); cnt[id] = 100; } if (vcpu_timer_expired(current)) vcpu_pend_timer(current); } +void continue_running(struct exec_domain *same) +{ + /* nothing to do */ +} + void panic_domain(struct pt_regs *regs, const char *fmt, ...) { va_list args; diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c index 322b200a3a..0cb953c1bb 100644 --- a/xen/arch/x86/domain.c +++ b/xen/arch/x86/domain.c @@ -794,7 +794,12 @@ void context_switch(struct exec_domain *prev, struct exec_domain *next) clear_bit(EDF_RUNNING, &prev->ed_flags); schedule_tail(next); + BUG(); +} +void continue_running(struct exec_domain *same) +{ + schedule_tail(same); BUG(); } diff --git a/xen/arch/x86/x86_32/entry.S b/xen/arch/x86/x86_32/entry.S index 60f154b270..8a36e7ba02 100644 --- a/xen/arch/x86/x86_32/entry.S +++ b/xen/arch/x86/x86_32/entry.S @@ -652,6 +652,12 @@ ENTRY(setup_vm86_frame) addl $16,%esp ret +do_arch_sched_op: + # Ensure we return success even if we return via schedule_tail() + xorl %eax,%eax + movl %eax,UREGS_eax+4(%esp) + jmp SYMBOL_NAME(do_sched_op) + do_switch_vm86: # Discard the return address addl $4,%esp @@ -718,7 +724,7 @@ ENTRY(hypercall_table) .long SYMBOL_NAME(do_stack_switch) .long SYMBOL_NAME(do_set_callbacks) .long SYMBOL_NAME(do_fpu_taskswitch) /* 5 */ - .long SYMBOL_NAME(do_sched_op) + .long SYMBOL_NAME(do_arch_sched_op) .long SYMBOL_NAME(do_dom0_op) .long SYMBOL_NAME(do_set_debugreg) .long SYMBOL_NAME(do_get_debugreg) diff --git a/xen/arch/x86/x86_64/entry.S b/xen/arch/x86/x86_64/entry.S index b4280a1ff5..092828ac23 100644 --- a/xen/arch/x86/x86_64/entry.S +++ b/xen/arch/x86/x86_64/entry.S @@ -523,6 +523,12 @@ ENTRY(nmi) call SYMBOL_NAME(do_nmi) jmp restore_all_xen +do_arch_sched_op: + # Ensure we return success even if we return via schedule_tail() + xorl %eax,%eax + movq %rax,UREGS_rax+8(%rsp) + jmp SYMBOL_NAME(do_sched_op) + .data ENTRY(exception_table) @@ -554,7 +560,7 @@ ENTRY(hypercall_table) .quad SYMBOL_NAME(do_stack_switch) .quad SYMBOL_NAME(do_set_callbacks) .quad SYMBOL_NAME(do_fpu_taskswitch) /* 5 */ - .quad SYMBOL_NAME(do_sched_op) + .quad SYMBOL_NAME(do_arch_sched_op) .quad SYMBOL_NAME(do_dom0_op) .quad SYMBOL_NAME(do_set_debugreg) .quad SYMBOL_NAME(do_get_debugreg) diff --git a/xen/common/schedule.c b/xen/common/schedule.c index 8062fdac73..4123231dd1 100644 --- a/xen/common/schedule.c +++ b/xen/common/schedule.c @@ -228,7 +228,9 @@ long do_block(void) /* Check for events /after/ blocking: avoids wakeup waiting race. */ if ( event_pending(ed) ) + { clear_bit(EDF_BLOCKED, &ed->ed_flags); + } else { TRACE_2D(TRC_SCHED_BLOCK, ed->domain->id, ed->eid); @@ -382,7 +384,7 @@ static void __enter_scheduler(void) spin_unlock_irq(&schedule_data[cpu].schedule_lock); if ( unlikely(prev == next) ) - return; + return continue_running(prev); perfc_incrc(sched_ctx); diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h index 915abaeddf..37ab3ff744 100644 --- a/xen/include/xen/sched.h +++ b/xen/include/xen/sched.h @@ -210,7 +210,7 @@ static inline void get_knownalive_domain(struct domain *d) atomic_inc(&d->refcnt); ASSERT(!(atomic_read(&d->refcnt) & DOMAIN_DESTRUCTED)); } - + extern struct domain *do_createdomain( domid_t dom_id, unsigned int cpu); extern int construct_dom0( @@ -265,10 +265,15 @@ extern void sync_lazy_execstate_cpuset(unsigned long cpuset); extern void sync_lazy_execstate_all(void); extern int __sync_lazy_execstate(void); +/* Called by the scheduler to switch to another exec_domain. */ extern void context_switch( struct exec_domain *prev, struct exec_domain *next); +/* Called by the scheduler to continue running the current exec_domain. */ +extern void continue_running( + struct exec_domain *same); + void domain_init(void); int idle_cpu(int cpu); /* Is CPU 'cpu' idle right now? */ -- 2.30.2